return;
}
-static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
+static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir, u64 pte)
{
- unsigned long iot;
- iot = __gpfn_is_io(vcpu->domain, src_pa >> PAGE_SHIFT);
+ unsigned long iot = pte & GPFN_IO_MASK;
perfc_incra(vmx_mmio_access, iot >> 56);
switch (iot) {
/*
dir 1: read 0:write
*/
-void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
+void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma, u64 pte)
{
REGS *regs;
IA64_BUNDLE bundle;
}
if (vcpu->domain->arch.is_sioemu) {
- unsigned long iot = __gpfn_is_io(vcpu->domain, padr >> PAGE_SHIFT);
+ unsigned long iot = pte & GPFN_IO_MASK;
if (iot != GPFN_PIB && iot != GPFN_IOSAPIC) {
sioemu_io_emulate(padr, data, data1, update_word);
}
if (size == 4) {
- mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir);
+ mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir, pte);
size = 3;
}
- mmio_access(vcpu, padr, &data, 1 << size, ma, dir);
+ mmio_access(vcpu, padr, &data, 1 << size, ma, dir, pte);
emulate_io_update(vcpu, update_word, data, data1);
}
#ifdef VTLB_DEBUG
int index;
#endif
- u64 gpfn;
+ u64 gpfn, gpte;
u64 ps, va, rid;
thash_data_t * p_dtr;
+
ps = itir_ps(itir);
va = PAGEALIGN(ifa, ps);
#ifdef VTLB_DEBUG
if (ps != _PAGE_SIZE_16M)
thash_purge_entries(vcpu, va, ps);
gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
- if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
+ gpte = lookup_domain_mpa(vcpu->domain, gpfn, NULL);
+ if (gpte & GPFN_IO_MASK)
pte |= VTLB_PTE_IO;
vcpu_get_rr(vcpu, va, &rid);
- rid = rid& RR_RID_MASK;
+ rid &= RR_RID_MASK;
p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
mmu_mode = VMX_MMU_MODE(v);
if ((mmu_mode == VMX_MMU_PHY_DT
|| (mmu_mode == VMX_MMU_PHY_D && type == DSIDE_TLB))
- && !((vadr<<1)>>62)) {
+ && (REGION_NUMBER(vadr) & 3) == 0) {
if (type == DSIDE_TLB) {
+ u64 pte;
/* DTLB miss. */
if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
return vmx_handle_lds(regs);
+ pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
/* Clear UC bit in vadr with the shifts. */
- if (v->domain != dom0
- && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
- emulate_io_inst(v, ((vadr << 1) >> 1), 4);
+ if (v->domain != dom0 && (pte & GPFN_IO_MASK)) {
+ emulate_io_inst(v, pa_clear_uc(vadr), 4, pte);
return IA64_FAULT;
}
}
if (data != 0) {
/* Found. */
if (v->domain != dom0 && type == DSIDE_TLB) {
+ u64 pte;
if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
return vmx_handle_lds(regs);
}
gppa = (vadr & ((1UL << data->ps) - 1)) +
(data->ppn >> (data->ps - 12) << data->ps);
- if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
+ pte = lookup_domain_mpa(v->domain, gppa, NULL);
+ if (pte & GPFN_IO_MASK) {
if (misr.sp)
panic_domain(NULL, "ld.s on I/O page not with UC attr."
" pte=0x%lx\n", data->page_flags);
if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
- emulate_io_inst(v, gppa, data->ma);
+ emulate_io_inst(v, gppa, data->ma, pte);
else {
vcpu_set_isr(v, misr.val);
data_access_rights(v, vadr);
#define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
-/* Return I/O type if trye */
-#define __gpfn_is_io(_d, gpfn) \
-({ \
- u64 pte, ret=0; \
- pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL); \
- ret = (pte != INVALID_MFN) ? pte & GPFN_IO_MASK : 0; \
- ret; \
-})
-
-#define __gpfn_is_mem(_d, gpfn) \
-({ \
- u64 pte, ret=0; \
- pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL); \
- ret = (pte != INVALID_MFN) && (pte & GPFN_IO_MASK) == GPFN_MEM; \
- ret; \
-})
-
-
#define __gpa_to_mpa(_d, gpa) \
((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
extern void free_domain_tlb(struct vcpu *v);
extern thash_data_t * vhpt_lookup(u64 va);
extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
-extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma);
+extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma, u64 pte);
extern void emulate_io_update(struct vcpu *vcpu, u64 word, u64 d, u64 d1);
extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
#define virt_to_xenva(va) ((unsigned long)va - PAGE_OFFSET - \
xen_pstart + KERNEL_START)
+/* Clear bit 63 (UC bit in physical addresses). */
+static inline u64 pa_clear_uc(u64 paddr)
+{
+ return (paddr << 1) >> 1;
+}
#undef __pa
#undef __va